Skip to content

Commit 726f6e8

Browse files
authored
Enable gpu support in bench (#581)
1 parent a76bf80 commit 726f6e8

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

llama.cpp/llama-bench/llama-bench.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -450,6 +450,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
450450
invalid_param = true;
451451
break;
452452
}
453+
FLAG_gpu = LLAMAFILE_GPU_AUTO;
453454
auto p = split<int>(argv[i], split_delim);
454455
params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end());
455456
} else if (arg == "-sm" || arg == "--split-mode") {
@@ -1357,8 +1358,6 @@ __attribute__((__constructor__(101))) static void init(void) {
13571358
}
13581359

13591360
int main(int argc, char ** argv) {
1360-
FLAG_gpu = LLAMAFILE_GPU_DISABLE; // [jart]
1361-
13621361
ShowCrashReports();
13631362

13641363
// try to set locale for unicode characters in markdown
@@ -1382,6 +1381,7 @@ int main(int argc, char ** argv) {
13821381
#endif
13831382

13841383
cmd_params params = parse_cmd_params(argc, argv);
1384+
FLAGS_READY = true;
13851385

13861386
// initialize llama.cpp
13871387
if (!params.verbose) {

0 commit comments

Comments
 (0)